Finally, full & working & tested xeno with userspace domain building... Go and boot whatever :)
curr = p->pg_head;
*list++ = p->pg_head;
page = (frame_table + p->pg_head)->next;
- printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
while(page != p->pg_head){
if(!((unsigned long)list & (PAGE_SIZE-1))){
- printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
curr = (frame_table + curr)->next;
+ unmap_domain_mem((unsigned long)(list-1) & PAGE_MASK);
list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
}
*list++ = page;
page = (frame_table + page)->next;
}
+ unmap_domain_mem((unsigned long)(list-1) & PAGE_MASK);
}
long do_dom0_op(dom0_op_t *u_dom0_op)
* needs to be allocated
*/
if(dom != 0){
+
if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
ret = -1;
break;
}
build_page_list(p);
+
ret = p->domain;
op.u.newdomain.domain = ret;
op.u.newdomain.pg_head = p->pg_head;
copy_to_user(u_dom0_op, &op, sizeof(op));
- printk(KERN_ALERT "bd240 debug: hyp dom0_ops: %lx, %d\n", op.u.newdomain.pg_head, op.u.newdomain.memory_kb);
break;
}
temp = temp->next;
list_del(&pf->list);
pf->next = pf->prev = p->pg_head = (pf - frame_table);
+ pf->type_count = pf->tot_count = 0;
free_pfns--;
pf_head = pf;
pf->prev = pf_head->prev;
(frame_table + pf_head->prev)->next = (pf - frame_table);
pf_head->prev = (pf - frame_table);
+ pf->type_count = pf->tot_count = 0;
free_pfns--;
}
start_info_t * virt_startinfo_addr;
unsigned long virt_stack_addr;
unsigned long long time;
- unsigned long phys_l1tab, phys_l2tab;
+ unsigned long phys_l2tab;
page_update_request_t * pgt_updates;
unsigned long curr_update_phys;
unsigned long count;
pgt_updates = (page_update_request_t *)map_domain_mem(curr_update_phys);
}
}
+ unmap_domain_mem((void *)((unsigned long)(pgt_updates-1) & PAGE_MASK));
/* entries 0xe0000000 onwards in page table must contain hypervisor
* mem mappings - set them up.
phys_l2tab = pagetable_val(p->mm.pagetable);
l2tab = map_domain_mem(phys_l2tab);
l2tab += l2_table_offset(meminfo->virt_shinfo_addr);
- phys_l1tab = l2_pgentry_to_phys(*l2tab) +
- (l1_table_offset(meminfo->virt_shinfo_addr) * sizeof(l1_pgentry_t));
- l1tab = map_domain_mem(phys_l1tab);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(meminfo->virt_shinfo_addr);
*l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
- unmap_domain_mem(l2tab);
- unmap_domain_mem(l1tab);
+ unmap_domain_mem((void *)((unsigned long)l2tab & PAGE_MASK));
+ unmap_domain_mem((void *)((unsigned long)l1tab & PAGE_MASK));
/* set up the shared info structure */
rdtscll(time);
__asm__ __volatile__ (
"mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));
__sti();
-
-
+
new_thread(p,
(unsigned long)meminfo->virt_load_addr,
(unsigned long)virt_stack_addr,
unsigned long alloc_index;
unsigned long ft_pages;
l2_pgentry_t *l2tab, *l2start;
- l1_pgentry_t *l1tab = NULL;
+ l1_pgentry_t *l1tab = NULL, *l1start = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
net_vif_t *net_vif;
* and frame table struct.
*/
- ft_pages = (frame_table_size + (PAGE_SIZE - 1)) << PAGE_SHIFT;
+ ft_pages = frame_table_size >> PAGE_SHIFT;
l2tab += l2_table_offset(virt_load_address);
cur_address = p->pg_head << PAGE_SHIFT;
for ( count = 0;
- count < p->tot_pages + 1;
+ count < p->tot_pages + 1 + ft_pages;
count++)
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
- if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
+ if ( l1tab != NULL ) unmap_domain_mem(l1start);
phys_l1tab = alloc_page_from_domain(&alloc_address, &alloc_index);
*l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
- l1tab = map_domain_mem(phys_l1tab);
+ l1start = l1tab = map_domain_mem(phys_l1tab);
clear_page(l1tab);
l1tab += l1_table_offset(
virt_load_address + (count << PAGE_SHIFT));
}
+ *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
- if( count < alloc_index )
+ if(count < p->tot_pages)
{
- *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_writeable_page;
page->type_count = page->tot_count = 1;
- }
- else
- {
- *l1tab++ = mk_l1_pgentry((cur_address|L1_PROT) & ~_PAGE_RW);
- page = frame_table + (cur_address >> PAGE_SHIFT);
- page->flags = dom | PGT_l1_page_table;
- page->type_count = 1;
- page->tot_count = 2;
}
cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
}
- unmap_domain_mem(l1tab-1);
- page = frame_table + (frame_table + p->pg_head)->prev;
+ unmap_domain_mem(l1start);
+
+ /* pages that are part of page tables must be read only */
+ cur_address = p->pg_head << PAGE_SHIFT;
+ for(count = 0;
+ count < alloc_index;
+ count++){
+ cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
+ }
+
+ l2tab = l2start + l2_table_offset(virt_load_address +
+ (alloc_index << PAGE_SHIFT));
+ l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(virt_load_address + (alloc_index << PAGE_SHIFT));
+ l2tab++;
+ for(count = alloc_index;
+ count < p->tot_pages;
+ count++){
+ *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
+ if(!((unsigned long)l1tab & (PAGE_SIZE - 1))){
+ unmap_domain_mem(l1start);
+ l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l2tab++;
+ }
+ page = frame_table + (cur_address >> PAGE_SHIFT);
+ page->flags = dom | PGT_l1_page_table;
+ page->tot_count++;
+
+ cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
+ }
page->flags = dom | PGT_l2_page_table;
+ unmap_domain_mem(l1start);
/* Map in the the shared info structure. */
virt_shinfo_address = virt_load_address + (p->tot_pages << PAGE_SHIFT);
l2tab = l2start + l2_table_offset(virt_shinfo_address);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
l1tab += l1_table_offset(virt_shinfo_address);
*l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
- unmap_domain_mem(l1tab);
+ unmap_domain_mem(l1start);
/* Set up shared info area. */
rdtscll(time);
cur_address < virt_ftable_end;
cur_address += PAGE_SIZE){
l2tab = l2start + l2_table_offset(cur_address);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
l1tab += l1_table_offset(cur_address);
*l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
- unmap_domain_mem(l1tab);
+ unmap_domain_mem(l1start);
ft_mapping += PAGE_SIZE;
}
return 0;
}
-
void __init domain_init(void)
{
int i;
schedule_data[i].curr = &idle0_task;
}
}
-
-
-
-#if 0
- unsigned long s = (mod[ 0].mod_start + (PAGE_SIZE-1)) & PAGE_MASK;
- unsigned long e = (mod[nr_mods-1].mod_end + (PAGE_SIZE-1)) & PAGE_MASK;
- while ( s != e )
- {
- free_pages((unsigned long)__va(s), 0);
- s += PAGE_SIZE;
- }
-#endif
-
flags & PG_type_mask, type, page_type_count(page));
return -1;
}
+
page->flags |= type;
}
{
l2_pgentry_t *p_l2_entry, l2_entry;
int i, ret=0;
-
+
ret = inc_page_refcnt(page_nr, PGT_l2_page_table);
if ( ret != 0 ) return (ret < 0) ? ret : 0;
if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry);
if ( ret ) goto out;
}
-
+
/* Now we simply slap in our high mapping. */
memcpy(p_l2_entry,
idle_pg_table[smp_processor_id()] + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
(_PAGE_GLOBAL|_PAGE_PAT)) )
{
- printk(KERN_ALERT "bd240 debug: bad l1 entry val %lx\n", l1_pgentry_val(new_l1_entry) & (_PAGE_GLOBAL | _PAGE_PAT));
-
MEM_LOG("Bad L1 entry val %04lx",
l1_pgentry_val(new_l1_entry) &
(_PAGE_GLOBAL|_PAGE_PAT));
if ( get_page(l1_pgentry_to_pagenr(new_l1_entry),
l1_pgentry_val(new_l1_entry) & _PAGE_RW) ){
- printk(KERN_ALERT "bd240 debug: get_page err\n");
goto fail;
}
}
break;
- /*
- * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
- * bottom-level page-table entry.
- * Restrictions apply:
- * 1. Update only allowed by domain 0.
- * 2. Update must be to a level-1 pte belonging to dom0.
- */
- case PGREQ_UNCHECKED_UPDATE:
- cur->ptr &= ~(sizeof(l1_pgentry_t) - 1);
- page = frame_table + pfn;
- flags = page->flags;
- if ( (flags | current->domain) == PGT_l1_page_table )
- {
-
- *(unsigned long *)map_domain_mem(cur->ptr) = cur->val;
- err = 0;
- }
- else
- {
- MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
- " bad pte type %08lx", current->domain, flags);
- }
- break;
-
/*
* PGREQ_EXTENDED_COMMAND: Extended command is specified
* in the least-siginificant bits of the 'value' field.
if ( err )
{
- page = frame_table + (cur->ptr >> PAGE_SHIFT);
kill_domain_with_errmsg("Illegal page update request");
}
+
/******************************************************************************
* dom0_core.c
*
#define DOM_DIR "dom"
#define DOM_MEM "mem"
+#define MAP_DISCONT 1
+
frame_table_t * frame_table;
static struct proc_dir_entry *xeno_base;
static void create_proc_dom_entries(int dom)
{
struct proc_dir_entry * dir;
- struct proc_dir_entry * file;
dom_procdata_t * dom_data;
char dir_name[MAX_LEN];
/* remap the range using xen specific routines */
- printk(KERN_ALERT "bd240 debug: dmw entered %lx, %lx\n", mem_data->pfn, mem_data->tot_pages);
-
- addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
+ addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, MAP_DISCONT, mem_data->tot_pages);
- printk(KERN_ALERT "bd240 debug: dmw exit %lx, %lx\n", mem_data->pfn, mem_data->tot_pages);
-
copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
return sizeof(addr);
params->num_vifs = op.u.newdomain.num_vifs;
params->domain = op.u.newdomain.domain;
- printk(KERN_ALERT "bd240 debug: cmd_write: %lx, %d, %d\n", params->pg_head, params->memory_kb, params->domain);
-
/* now notify user space of the new domain's id */
new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
if ( new_dom_id != NULL )
* management applications such as domain builder etc.
*/
-#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
+#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
-#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, 0)
+#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr), 0)
#define __direct_pte(x) ((pte_t) { (x) } )
#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
do {
pte_t oldpage;
oldpage = ptep_get_and_clear(pte);
+
direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
* used for remapping discontiguous bits of domain's memory, pages to map are
* found from frame table beginning at the given first_pg index
*/
-int direct_remap_disc_page_range(unsigned long from, unsigned long first_pg,
- int tot_pages, pgprot_t prot)
+int direct_remap_disc_page_range(unsigned long from,
+ unsigned long first_pg, int tot_pages, pgprot_t prot)
{
frame_table_t * current_ft;
unsigned long current_pfn;
unsigned long start = from;
int count = 0;
- current_ft = (frame_table_t *)(frame_table + first_pg);
+ current_ft = frame_table + first_pg;
current_pfn = first_pg;
while(count < tot_pages){
- if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, PAGE_SIZE, prot))
+ if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT,
+ PAGE_SIZE, prot))
goto out;
start += PAGE_SIZE;
current_pfn = current_ft->next;
/* and perform the mapping */
if(flag == MAP_DISCONT){
- printk(KERN_ALERT "bd240 debug: call direct_remap_disc_page_range\n");
- ret = direct_remap_disc_page_range(addr, phys_addr, tot_pages, prot);
+ ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT,
+ tot_pages, prot);
} else {
- printk(KERN_ALERT "bd240 debug: call direct_remap_page_range\n");
ret = direct_remap_page_range(addr, phys_addr, size, prot);
}